I want to have a standard notebook to see if the emulator's I've made are workign as expected, as I've been implementing a few different AB models.
In [1]:
from pearce.emulator import OriginalRecipe, ExtraCrispy
from pearce.mocks import cat_dict
import numpy as np
from os import path
In [2]:
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
In [3]:
#training_dir = '/u/ki/swmclau2/des/PearceLHC_wp_z_fscab_emulator/'
training_dir = '/u/ki/swmclau2/des/PearceLHC_wp_z_corrab_emulator/'
em_method = 'gp'
split_method = 'random'
In [4]:
a = 1.0
z = 1.0/a - 1.0
In [5]:
fixed_params = {'z':z}#, 'r':0.18477483}
In [6]:
emu = OriginalRecipe(training_dir, method = em_method, fixed_params=fixed_params)
In [ ]:
emu._ordered_params
In [ ]:
print emu.y
In [ ]:
metric = emu._get_initial_guess(None)
In [ ]:
metric
In [ ]:
new_hp = np.array([ 80.75541473 , 179.90950523 , 66.97808312 , 89.17850468 ,225.64738711,
21.02835102])
new_hp_pnames = ['mean_occupation_centrals_assembias_param1',
'mean_occupation_centrals_assembias_slope1',
'mean_occupation_centrals_assembias_split1',
'mean_occupation_satellites_assembias_param1',
'mean_occupation_satellites_assembias_slope1',
'mean_occupation_satellites_assembias_split1']
In [ ]:
from pearce.mocks import compute_prim_haloprop_bins, cat_dict
In [ ]:
cosmo_params = {'simname':'chinchilla', 'Lbox':400.0, 'scale_factors':[a]}
cat = cat_dict[cosmo_params['simname']](**cosmo_params)#construct the specified catalog!
cat.load_catalog(a)
#halo_masses = cat.halocat.halo_table['halo_mvir']
In [ ]:
cat.load_model(a, 'corrRedMagic')
In [ ]:
r_bins = np.logspace(-1, 1.6, 12)
rpoints = (r_bins[1:]+r_bins[:-1])/2
In [ ]:
rpoints
In [ ]:
fig = plt.figure(figsize=(10,8))
i = 50
emulation_point = [('f_c', 0.233), ('logM0', 12.0), ('sigma_logM', 0.333),
('alpha', 1.053),('logM1', 13.5), ('logMmin', 12.033)]
em_params = dict(emulation_point)
em_params.update(fixed_params)
del em_params['z']
ab_params = {'mean_occupation_satellites_assembias_param1':0.0,
'mean_occupation_centrals_assembias_param1':0.0,
'mean_occupation_satellites_assembias_slope1':5.0,
'mean_occupation_centrals_assembias_slope1':5.0,
'mean_occupation_satellites_assembias_split1':0.5,
'mean_occupation_centrals_assembias_split1':0.5}
em_params.update(ab_params)
em_params = {key: emu.x[i*len(emu.scale_bin_centers), idx] for idx, key in enumerate(emu._ordered_params.keys()[:-1]) }
key = 'mean_occupation_centrals_assembias_param1'
print em_params[key]
em_params[key] = 0.0
wp = emu.emulate_wrt_r(em_params,rpoints)[0]
print wp
plt.plot(rpoints, wp, label = 'Emulator' )
plt.plot(emu.scale_bin_centers, emu.y[i*len(emu.scale_bin_centers):(i+1)*len(emu.scale_bin_centers)]+emu.y_hat)
plt.plot(emu.scale_bin_centers, np.ones_like(emu.scale_bin_centers)*emu.y_hat)
plt.xscale('log')
plt.legend(loc='best')
#plt.xlim([0.1, 4])
#plt.title(r'$w(\theta)$ variance by %s'%param)
#plt.xlabel(r'$\theta$')
#plt.ylabel(r'$w(\theta)$')
plt.show()
In [ ]:
import scipy.optimize as op
from itertools import izip
In [ ]:
In [ ]:
def nll(p):
# Update the kernel parameters and compute the likelihood.
# params are log(a) and log(m)
#ll = 0
#for emulator, _y in izip(self._emulators, self.y):
# emulator.kernel[:] = p
# ll += emulator.lnlikelihood(_y, quiet=True)
emu._emulator.kernel[:] = p
print p
ll= emu._emulator.lnlikelihood(emu.y, quiet=False)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
#gll = 0
#for emulator, _y in izip(self._emulators, self.y):
# emulator.kernel[:] = p
# gll += emulator.grad_lnlikelihood(_y, quiet=True)
emu._emulator.kernel[:] = p
gll = emu._emulator.grad_lnlikelihood(emu.y, quiet=True)
return -gll
In [ ]:
emu.goodness_of_fit(training_dir)
In [ ]:
p0 = emu._emulator.kernel.vector
In [ ]:
p0 = np.log(np.random.rand(emu._emulator.kernel.vector.shape[0]))
results = op.minimize(nll, p0, jac=grad_nll)
In [ ]:
print results.x
print results.success
In [ ]:
from sklearn.model_selection import RandomizedSearchCV
from sklean.metrics import make_scorer
In [ ]:
def my_loss_func(predictions, emu.y)
In [ ]: